return ret;
}
-static int get_cpumap_size(xc_interface *xch)
-{
- return (xc_get_max_cpus(xch) + 7) / 8;
-}
-
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id)
int err = 0;
xc_cpupoolinfo_t *info = NULL;
int local_size;
- int cpumap_size;
- int size;
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BUFFER(uint8_t, local);
- local_size = get_cpumap_size(xch);
+ local_size = xc_get_cpumap_size(xch);
if (!local_size)
{
PERROR("Could not get number of cpus");
return NULL;
}
- cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
- size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
-
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
sysctl.u.cpupool_op.cpupool_id = poolid;
if ( err < 0 )
goto out;
- info = malloc(size);
+ info = calloc(1, sizeof(xc_cpupoolinfo_t));
if ( !info )
goto out;
- memset(info, 0, size);
- info->cpumap_size = local_size * 8;
- info->cpumap = (uint64_t *)(info + 1);
-
+ info->cpumap = xc_cpumap_alloc(xch);
+ if (!info->cpumap) {
+ free(info);
+ goto out;
+ }
info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
info->sched_id = sysctl.u.cpupool_op.sched_id;
info->n_dom = sysctl.u.cpupool_op.n_dom;
- bitmap_byte_to_64(info->cpumap, local, local_size * 8);
+ memcpy(info->cpumap, local, local_size);
out:
xc_hypercall_buffer_free(xch, local);
return info;
}
+void xc_cpupool_infofree(xc_interface *xch,
+ xc_cpupoolinfo_t *info)
+{
+ free(info->cpumap);
+ free(info);
+}
+
int xc_cpupool_addcpu(xc_interface *xch,
uint32_t poolid,
int cpu)
return do_sysctl_save(xch, &sysctl);
}
-uint64_t * xc_cpupool_freeinfo(xc_interface *xch,
- int *cpusize)
+xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch)
{
int err = -1;
- uint64_t *cpumap = NULL;
+ xc_cpumap_t cpumap = NULL;
+ int mapsize;
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BUFFER(uint8_t, local);
- *cpusize = get_cpumap_size(xch);
- if (*cpusize == 0)
+ mapsize = xc_get_cpumap_size(xch);
+ if (mapsize == 0)
return NULL;
- local = xc_hypercall_buffer_alloc(xch, local, *cpusize);
+ local = xc_hypercall_buffer_alloc(xch, local, mapsize);
if ( local == NULL ) {
PERROR("Could not allocate locked memory for xc_cpupool_freeinfo");
return NULL;
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8;
+ sysctl.u.cpupool_op.cpumap.nr_cpus = mapsize * 8;
err = do_sysctl_save(xch, &sysctl);
if ( err < 0 )
goto out;
- cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap));
+ cpumap = xc_cpumap_alloc(xch);
if (cpumap == NULL)
goto out;
- bitmap_byte_to_64(cpumap, local, *cpusize * 8);
+ memcpy(cpumap, local, mapsize);
out:
xc_hypercall_buffer_free(xch, local);
int xc_vcpu_setaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
- uint64_t *cpumap, int cpusize)
+ xc_cpumap_t cpumap)
{
DECLARE_DOMCTL;
DECLARE_HYPERCALL_BUFFER(uint8_t, local);
int ret = -1;
+ int cpusize;
+
+ cpusize = xc_get_cpumap_size(xch);
+ if (!cpusize)
+ {
+ PERROR("Could not get number of cpus");
+ goto out;
+ }
local = xc_hypercall_buffer_alloc(xch, local, cpusize);
if ( local == NULL )
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
- bitmap_64_to_byte(local, cpumap, cpusize * 8);
+ memcpy(local, cpumap, cpusize);
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
int xc_vcpu_getaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
- uint64_t *cpumap, int cpusize)
+ xc_cpumap_t cpumap)
{
DECLARE_DOMCTL;
DECLARE_HYPERCALL_BUFFER(uint8_t, local);
int ret = -1;
+ int cpusize;
+
+ cpusize = xc_get_cpumap_size(xch);
+ if (!cpusize)
+ {
+ PERROR("Could not get number of cpus");
+ goto out;
+ }
local = xc_hypercall_buffer_alloc(xch, local, cpusize);
- if(local == NULL)
+ if (local == NULL)
{
PERROR("Could not allocate memory for getvcpuaffinity domctl hypercall");
goto out;
ret = do_domctl(xch, &domctl);
- bitmap_byte_to_64(cpumap, local, cpusize * 8);
+ memcpy(cpumap, local, cpusize);
xc_hypercall_buffer_free(xch, local);
out:
return max_cpus;
}
+int xc_get_cpumap_size(xc_interface *xch)
+{
+ return (xc_get_max_cpus(xch) + 7) / 8;
+}
+
+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
+{
+ int sz;
+
+ sz = xc_get_cpumap_size(xch);
+ if (sz == 0)
+ return NULL;
+ return calloc(1, sz);
+}
+
int xc_readconsolering(xc_interface *xch,
char *buffer,
unsigned int *pnr_chars,
void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
#define xc_hypercall_buffer_free_pages(_xch, _name, _nr) xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
+/*
+ * CPUMAP handling
+ */
+typedef uint8_t *xc_cpumap_t;
+
+/* return maximum number of cpus the hypervisor supports */
+int xc_get_max_cpus(xc_interface *xch);
+
+/* return array size for cpumap */
+int xc_get_cpumap_size(xc_interface *xch);
+
+/* allocate a cpumap */
+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
+
/*
* DOMAIN DEBUGGING FUNCTIONS
*/
} start_info_any_t;
-/* return maximum number of cpus the hypervisor supports */
-int xc_get_max_cpus(xc_interface *xch);
-
int xc_domain_create(xc_interface *xch,
uint32_t ssidref,
xen_domain_handle_t handle,
int xc_vcpu_setaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ xc_cpumap_t cpumap);
int xc_vcpu_getaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
- uint64_t *cpumap,
- int cpusize);
+ xc_cpumap_t cpumap);
/**
* This function will return information about one or more domains. It is
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
- uint32_t cpumap_size; /* max number of cpus in map */
- uint64_t *cpumap;
+ xc_cpumap_t cpumap;
} xc_cpupoolinfo_t;
/**
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid lowest id for which info is returned
- * return cpupool info ptr (obtained by malloc)
+ * return cpupool info ptr (to be freed via xc_cpupool_infofree)
*/
xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
uint32_t poolid);
+/**
+ * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm info area to free
+ */
+void xc_cpupool_infofree(xc_interface *xch,
+ xc_cpupoolinfo_t *info);
+
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
*
* Return map of cpus not in any cpupool.
*
* @parm xc_handle a handle to an open hypervisor interface
- * @parm cpusize where to store array size in bytes
* return cpumap array on success, NULL else
*/
-uint64_t *xc_cpupool_freeinfo(xc_interface *xch,
- int *cpusize);
+xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
/*
libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool)
{
libxl_cpupoolinfo *ptr, *tmp;
- int i, m, ncpu;
+ int i;
xc_cpupoolinfo_t *info;
uint32_t poolid;
ptr = NULL;
- ncpu = xc_get_max_cpus(ctx->xch);
- if (!ncpu) {
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting max cpu number");
- return NULL;
- }
poolid = 0;
for (i = 0;; i++) {
if (!tmp) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
free(ptr);
+ xc_cpupool_infofree(ctx->xch, info);
return NULL;
}
ptr = tmp;
ptr[i].poolid = info->cpupool_id;
ptr[i].sched_id = info->sched_id;
ptr[i].n_dom = info->n_dom;
- if (libxl_cpumap_alloc(&ptr[i].cpumap, ncpu))
+ if (libxl_cpumap_alloc(ctx, &ptr[i].cpumap)) {
+ xc_cpupool_infofree(ctx->xch, info);
break;
- for (m = 0; m < ptr[i].cpumap.size / sizeof(*ptr[i].cpumap.map); m++)
- ptr[i].cpumap.map[m] = (info->cpumap_size > (m * sizeof(*ptr[i].cpumap.map))) ?
- info->cpumap[m] : 0;
+ }
+ memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size);
poolid = info->cpupool_id + 1;
- free(info);
+ xc_cpupool_infofree(ctx->xch, info);
}
*nb_pool = i;
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist");
return NULL;
}
- *nrcpus = xc_get_max_cpus(ctx->xch);
+ *nrcpus = libxl_get_max_cpus(ctx);
ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo));
if (!ptr) {
return NULL;
}
for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
- if (libxl_cpumap_alloc(&ptr->cpumap, *nrcpus)) {
+ if (libxl_cpumap_alloc(ctx, &ptr->cpumap)) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap");
return NULL;
}
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info");
return NULL;
}
- if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
- ptr->cpumap.map, ((*nrcpus) + 7) / 8) == -1) {
+ if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == -1) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
return NULL;
}
}
int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
- uint64_t *cpumap, int nrcpus)
+ libxl_cpumap *cpumap)
{
- if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap, (nrcpus + 7) / 8)) {
+ if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
return ERROR_FAIL;
}
{
int ncpus;
- cpumap->map = xc_cpupool_freeinfo(ctx->xch, &ncpus);
+ ncpus = libxl_get_max_cpus(ctx);
+ if (ncpus == 0)
+ return ERROR_FAIL;
+
+ cpumap->map = xc_cpupool_freeinfo(ctx->xch);
if (cpumap->map == NULL)
return ERROR_FAIL;
return ERROR_FAIL;
}
- for (i = 0; i < cpumap.size * 8; i++)
- if (cpumap.map[i / 64] & (1L << (i % 64))) {
+ libxl_for_each_cpu(i, cpumap)
+ if (libxl_cpumap_test(&cpumap, i)) {
rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
if (rc) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
int rc, i;
xc_cpupoolinfo_t *info;
xs_transaction_t t;
+ libxl_cpumap cpumap;
info = xc_cpupool_getinfo(ctx->xch, poolid);
if (info == NULL)
if ((info->cpupool_id != poolid) || (info->n_dom))
goto out;
- for (i = 0; i < info->cpumap_size; i++)
- if (info->cpumap[i / 64] & (1L << (i % 64))) {
+ rc = ERROR_NOMEM;
+ if (libxl_cpumap_alloc(ctx, &cpumap))
+ goto out;
+
+ memcpy(cpumap.map, info->cpumap, cpumap.size);
+ libxl_for_each_cpu(i, cpumap)
+ if (libxl_cpumap_test(&cpumap, i)) {
rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
if (rc) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
"Error removing cpu from cpupool");
rc = ERROR_FAIL;
- goto out;
+ goto out1;
}
}
if (rc) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "Could not destroy cpupool");
rc = ERROR_FAIL;
- goto out;
+ goto out1;
}
for (;;) {
rc = 0;
+out1:
+ libxl_cpumap_destroy(&cpumap);
out:
- free(info);
+ xc_cpupool_infofree(ctx->xch, info);
return rc;
}
typedef struct {
uint32_t size; /* number of bytes in map */
- uint64_t *map;
+ uint8_t *map;
} libxl_cpumap;
void libxl_cpumap_destroy(libxl_cpumap *map);
libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
int *nb_vcpu, int *nrcpus);
int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
- uint64_t *cpumap, int nrcpus);
+ libxl_cpumap *cpumap);
int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, uint32_t bitmask);
int libxl_get_sched_id(libxl_ctx *ctx);
return rc;
}
-int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus)
+int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap)
{
- int elems;
+ int max_cpus;
+ int sz;
- elems = (max_cpus + 63) / 64;
- cpumap->map = calloc(elems, sizeof(*cpumap->map));
+ max_cpus = libxl_get_max_cpus(ctx);
+ if (max_cpus == 0)
+ return ERROR_FAIL;
+
+ sz = (max_cpus + 7) / 8;
+ cpumap->map = calloc(sz, sizeof(*cpumap->map));
if (!cpumap->map)
return ERROR_NOMEM;
- cpumap->size = elems * 8; /* size in bytes */
+ cpumap->size = sz;
return 0;
}
{
if (cpu >= cpumap->size * 8)
return 0;
- return (cpumap->map[cpu / 64] & (1L << (cpu & 63))) ? 1 : 0;
+ return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0;
}
void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
{
if (cpu >= cpumap->size * 8)
return;
- cpumap->map[cpu / 64] |= 1L << (cpu & 63);
+ cpumap->map[cpu / 8] |= 1 << (cpu & 7);
}
void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
{
if (cpu >= cpumap->size * 8)
return;
- cpumap->map[cpu / 64] &= ~(1L << (cpu & 63));
+ cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
}
int libxl_get_max_cpus(libxl_ctx *ctx)
* return -1 if there are an error */
int libxl_check_device_model_version(libxl_ctx *ctx, char *path);
-int libxl_cpumap_alloc(libxl_cpumap *cpumap, int max_cpus);
+int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap);
int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+#define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; var++)
+
#endif
uint32_t nr_cpus)
{
int i, l;
- uint64_t *cpumap;
- uint64_t pcpumap;
+ uint8_t *cpumap;
+ uint8_t pcpumap;
char *domname;
/* NAME ID VCPU */
/* TIM */
printf("%9.1f ", ((float)vcpuinfo->vcpu_time / 1e9));
/* CPU AFFINITY */
- pcpumap = nr_cpus > 64 ? (uint64_t)-1 : ((1ULL << nr_cpus) - 1);
+ pcpumap = nr_cpus > 8 ? (uint8_t)-1 : ((1 << nr_cpus) - 1);
for (cpumap = vcpuinfo->cpumap.map; nr_cpus; ++cpumap) {
if (*cpumap < pcpumap) {
break;
}
- if (nr_cpus > 64) {
+ if (nr_cpus > 8) {
pcpumap = -1;
- nr_cpus -= 64;
+ nr_cpus -= 8;
} else {
pcpumap = ((1 << nr_cpus) - 1);
nr_cpus = 0;
}
}
printf("\n");
- nr_cpus = nr_cpus > 64 ? nr_cpus - 64 : 0;
+ nr_cpus = nr_cpus > 8 ? nr_cpus - 8 : 0;
}
}
}
static void vcpupin(char *d, const char *vcpu, char *cpu)
{
libxl_vcpuinfo *vcpuinfo;
- uint64_t *cpumap = NULL;
+ libxl_cpumap cpumap;
uint32_t vcpuid, cpuida, cpuidb;
char *endptr, *toka, *tokb;
- int i, nb_vcpu, cpusize, cpumapsize;
+ int i, nb_vcpu;
vcpuid = strtoul(vcpu, &endptr, 10);
if (vcpu == endptr) {
find_domain(d);
- if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) {
- fprintf(stderr, "libxl_get_max_cpus failed.\n");
- goto vcpupin_out1;
- }
- cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t);
-
- cpumap = calloc(cpumapsize, sizeof (uint64_t));
- if (!cpumap) {
- goto vcpupin_out1;
+ if (libxl_cpumap_alloc(&ctx, &cpumap)) {
+ goto vcpupin_out;
}
if (strcmp(cpu, "all")) {
for (toka = strtok(cpu, ","), i = 0; toka; toka = strtok(NULL, ","), ++i) {
cpuida = strtoul(toka, &endptr, 10);
if (toka == endptr) {
fprintf(stderr, "Error: Invalid argument.\n");
- goto vcpupin_out;
+ goto vcpupin_out1;
}
if (*endptr == '-') {
tokb = endptr + 1;
cpuidb = strtoul(tokb, &endptr, 10);
if ((tokb == endptr) || (cpuida > cpuidb)) {
fprintf(stderr, "Error: Invalid argument.\n");
- goto vcpupin_out;
+ goto vcpupin_out1;
}
while (cpuida <= cpuidb) {
- cpumap[cpuida / 64] |= (1 << (cpuida % 64));
+ libxl_cpumap_set(&cpumap, cpuida);
++cpuida;
}
} else {
- cpumap[cpuida / 64] |= (1 << (cpuida % 64));
+ libxl_cpumap_set(&cpumap, cpuida);
}
}
}
else {
- memset(cpumap, -1, sizeof (uint64_t) * cpumapsize);
+ memset(cpumap.map, -1, cpumap.size);
}
if (vcpuid != -1) {
- if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid,
- cpumap, cpusize) == -1) {
+ if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, &cpumap) == -1) {
fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid);
}
}
else {
if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) {
fprintf(stderr, "libxl_list_vcpu failed.\n");
- goto vcpupin_out;
+ goto vcpupin_out1;
}
for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
- if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid,
- cpumap, cpusize) == -1) {
+ if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, &cpumap) == -1) {
fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid);
}
}
}
vcpupin_out1:
- free(cpumap);
+ libxl_cpumap_destroy(&cpumap);
vcpupin_out:
;
}
printf("free_memory : %"PRIu64"\n", info.free_pages / i);
}
if (!libxl_get_freecpus(&ctx, &cpumap)) {
- for (i = 0; i < cpumap.size * 8; i++)
+ libxl_for_each_cpu(i, cpumap)
if (libxl_cpumap_test(&cpumap, i))
n++;
printf("free_cpus : %d\n", n);
fprintf(stderr, "libxl_get_freecpus failed\n");
return -ERROR_FAIL;
}
- if (libxl_cpumap_alloc(&cpumap, freemap.size * 8)) {
+ if (libxl_cpumap_alloc(&ctx, &cpumap)) {
fprintf(stderr, "Failed to allocate cpumap\n");
return -ERROR_FAIL;
}
} else {
n_cpus = 1;
n = 0;
- for (i = 0; i < freemap.size * 8; i++)
+ libxl_for_each_cpu(i, freemap)
if (libxl_cpumap_test(&freemap, i)) {
n++;
libxl_cpumap_set(&cpumap, i);
printf("%-19s", name);
free(name);
n = 0;
- for (c = 0; c < poolinfo[p].cpumap.size * 8; c++)
- if (poolinfo[p].cpumap.map[c / 64] & (1L << (c % 64))) {
+ libxl_for_each_cpu(c, poolinfo[p].cpumap)
+ if (libxl_cpumap_test(&poolinfo[p].cpumap, c)) {
if (n && opt_cpus) printf(",");
if (opt_cpus) printf("%d", c);
n++;
{
uint32_t dom;
int vcpu = 0, i;
- uint64_t *cpumap;
+ xc_cpumap_t cpumap;
PyObject *cpulist = NULL;
- int nr_cpus, size;
- uint64_t cpumap_size = sizeof(*cpumap);
static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
&dom, &vcpu, &cpulist) )
return NULL;
- nr_cpus = xc_get_max_cpus(self->xc_handle);
- if ( nr_cpus == 0 )
- return pyxc_error_to_exception(self->xc_handle);
-
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- cpumap = malloc(cpumap_size * size);
+ cpumap = xc_cpumap_alloc(self->xc_handle);
if(cpumap == NULL)
return pyxc_error_to_exception(self->xc_handle);
if ( (cpulist != NULL) && PyList_Check(cpulist) )
{
- for ( i = 0; i < size; i++)
- {
- cpumap[i] = 0ULL;
- }
for ( i = 0; i < PyList_Size(cpulist); i++ )
{
long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
- cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+ cpumap[cpu / 8] |= 1 << (cpu % 8);
}
}
- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
{
free(cpumap);
return pyxc_error_to_exception(self->xc_handle);
uint32_t dom, vcpu = 0;
xc_vcpuinfo_t info;
int rc, i;
- uint64_t *cpumap;
- int nr_cpus, size;
- uint64_t cpumap_size = sizeof(*cpumap);
+ xc_cpumap_t cpumap;
+ int nr_cpus;
static char *kwd_list[] = { "domid", "vcpu", NULL };
if ( rc < 0 )
return pyxc_error_to_exception(self->xc_handle);
- size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
- if((cpumap = malloc(cpumap_size * size)) == NULL)
- return pyxc_error_to_exception(self->xc_handle);
- memset(cpumap, 0, cpumap_size * size);
+ cpumap = xc_cpumap_alloc(self->xc_handle);
+ if(cpumap == NULL)
+ return pyxc_error_to_exception(self->xc_handle);
- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap);
if ( rc < 0 )
{
free(cpumap);
cpulist = PyList_New(0);
for ( i = 0; i < nr_cpus; i++ )
{
- if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
+ if (*(cpumap + i / 8) & 1 ) {
PyObject *pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap[i / (cpumap_size * 8)] >>= 1;
+ cpumap[i / 8] >>= 1;
}
PyDict_SetItemString(info_dict, "cpumap", cpulist);
Py_DECREF(cpulist);
return zero;
}
-static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
+static PyObject *cpumap_to_cpulist(XcObject *self, xc_cpumap_t cpumap)
{
PyObject *cpulist = NULL;
int i;
+ int nr_cpus;
+
+ nr_cpus = xc_get_max_cpus(self->xc_handle);
+ if ( nr_cpus == 0 )
+ return pyxc_error_to_exception(self->xc_handle);
cpulist = PyList_New(0);
- for ( i = 0; i < cpusize; i++ )
+ for ( i = 0; i < nr_cpus; i++ )
{
- if ( *cpumap & (1L << (i % 64)) )
+ if ( *cpumap & (1 << (i % 8)) )
{
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- if ( (i % 64) == 63 )
+ if ( (i % 8) == 7 )
cpumap++;
}
return cpulist;
"cpupool", (int)info->cpupool_id,
"sched", info->sched_id,
"n_dom", info->n_dom,
- "cpulist", cpumap_to_cpulist(info->cpumap,
- info->cpumap_size));
+ "cpulist", cpumap_to_cpulist(self, info->cpumap));
pool = info->cpupool_id + 1;
- free(info);
+ xc_cpupool_infofree(self->xc_handle, info);
if ( info_dict == NULL )
{
static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
{
- uint64_t *cpumap;
- int mapsize;
+ xc_cpumap_t cpumap;
PyObject *info = NULL;
- cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize);
+ cpumap = xc_cpupool_freeinfo(self->xc_handle);
if (!cpumap)
return pyxc_error_to_exception(self->xc_handle);
- info = cpumap_to_cpulist(cpumap, mapsize * 8);
+ info = cpumap_to_cpulist(self, cpumap);
free(cpumap);